#define PREFIX "ACPI: "
-void (*pm_idle) (void);
+void (*pm_idle) (void) __read_mostly;
EXPORT_SYMBOL(pm_idle);
-void (*pm_power_off) (void);
+void (*pm_power_off) (void) __read_mostly;
EXPORT_SYMBOL(pm_power_off);
unsigned int acpi_cpei_override;
#endif
#ifdef XEN
-nodemask_t node_online_map = { { [0] = 1UL } };
+nodemask_t __read_mostly node_online_map = { { [0] = 1UL } };
#endif
u8 cpu_to_node_map[NR_CPUS] __cacheline_aligned;
static void default_idle(void);
static void default_dead_idle(void);
-void (*pm_idle) (void) = default_idle;
-void (*dead_idle) (void) = default_dead_idle;
+void (*pm_idle) (void) __read_mostly = default_idle;
+void (*dead_idle) (void) __read_mostly = default_dead_idle;
static void paravirt_ctxt_switch_from(struct vcpu *v);
static void paravirt_ctxt_switch_to(struct vcpu *v);
return dmi_bigsmp;
}
-struct genapic apic_bigsmp = {
+const struct genapic apic_bigsmp = {
APIC_INIT("bigsmp", probe_bigsmp),
GENAPIC_PHYS
};
return 1;
}
-struct genapic apic_default = {
+const struct genapic apic_default = {
APIC_INIT("default", probe_default),
GENAPIC_FLAT
};
#include <xen/kernel.h>
#include <xen/ctype.h>
#include <xen/init.h>
+#include <asm/cache.h>
#include <asm/fixmap.h>
#include <asm/mpspec.h>
#include <asm/apicdef.h>
#include <asm/mach-generic/mach_apic.h>
#include <asm/setup.h>
-extern struct genapic apic_summit;
-extern struct genapic apic_bigsmp;
-extern struct genapic apic_default;
+extern const struct genapic apic_summit;
+extern const struct genapic apic_bigsmp;
+extern const struct genapic apic_default;
-struct genapic *genapic;
+const struct genapic *__read_mostly genapic;
-struct genapic *apic_probe[] __initdata = {
+const struct genapic *apic_probe[] __initdata = {
&apic_summit,
&apic_bigsmp,
&apic_default, /* must be last */
return 0;
}
-struct genapic apic_summit = {
+const struct genapic apic_summit = {
APIC_INIT("summit", probe_summit),
GENAPIC_PHYS
};
#include <xen/smp.h>
#include <asm/mach-default/mach_mpparse.h>
-static int x2apic = 1;
+static int __initdata x2apic = 1;
boolean_param("x2apic", x2apic);
-static int x2apic_phys = 0; /* By default we use logical cluster mode. */
+static int __initdata x2apic_phys; /* By default we use logical cluster mode. */
boolean_param("x2apic_phys", x2apic_phys);
static int __init probe_x2apic_phys(void)
iommu_supports_eim();
}
-struct genapic apic_x2apic_phys= {
+const struct genapic apic_x2apic_phys = {
APIC_INIT("x2apic_phys", probe_x2apic_phys),
GENAPIC_X2APIC_PHYS
};
-struct genapic apic_x2apic_cluster= {
+const struct genapic apic_x2apic_cluster = {
APIC_INIT("x2apic_cluster", probe_x2apic_cluster),
GENAPIC_X2APIC_CLUSTER
};
return rc;
}
-static struct x86_emulate_ops hvm_emulate_ops = {
+static const struct x86_emulate_ops hvm_emulate_ops = {
.read = hvmemul_read,
.insn_fetch = hvmemul_insn_fetch,
.write = hvmemul_write,
(addr < (HPET_BASE_ADDRESS + HPET_MMAP_SIZE)));
}
-struct hvm_mmio_handler hpet_mmio_handler = {
+const struct hvm_mmio_handler hpet_mmio_handler = {
.check_handler = hpet_range,
.read_handler = hpet_read,
.write_handler = hpet_write
#include <xen/event.h>
#include <xen/iommu.h>
-extern struct hvm_mmio_handler hpet_mmio_handler;
-extern struct hvm_mmio_handler vlapic_mmio_handler;
-extern struct hvm_mmio_handler vioapic_mmio_handler;
-extern struct hvm_mmio_handler msixtbl_mmio_handler;
+extern const struct hvm_mmio_handler hpet_mmio_handler;
+extern const struct hvm_mmio_handler vlapic_mmio_handler;
+extern const struct hvm_mmio_handler vioapic_mmio_handler;
+extern const struct hvm_mmio_handler msixtbl_mmio_handler;
#define HVM_MMIO_HANDLER_NR 4
-static struct hvm_mmio_handler *hvm_mmio_handlers[HVM_MMIO_HANDLER_NR] =
+static const struct hvm_mmio_handler *const
+hvm_mmio_handlers[HVM_MMIO_HANDLER_NR] =
{
&hpet_mmio_handler,
&vlapic_mmio_handler,
#include <xen/bitmap.h>
#include <asm/hvm/support.h>
-int hvm_port80_allowed = -1;
+int __read_mostly hvm_port80_allowed = -1;
boolean_param("hvm_port80", hvm_port80_allowed);
static int __init dmi_hvm_deny_port80(/*const*/ struct dmi_system_id *id)
svm_asid_g_invlpg(curr, vaddr);
}
-static struct hvm_function_table svm_function_table = {
+static struct hvm_function_table __read_mostly svm_function_table = {
.name = "SVM",
.cpu_down = svm_cpu_down,
.domain_initialise = svm_domain_initialise,
(addr < vioapic->base_address + VIOAPIC_MEM_LENGTH)));
}
-struct hvm_mmio_handler vioapic_mmio_handler = {
+const struct hvm_mmio_handler vioapic_mmio_handler = {
.check_handler = vioapic_range,
.read_handler = vioapic_read,
.write_handler = vioapic_write
return (!vlapic_hw_disabled(vlapic) && (offset < PAGE_SIZE));
}
-struct hvm_mmio_handler vlapic_mmio_handler = {
+const struct hvm_mmio_handler vlapic_mmio_handler = {
.check_handler = vlapic_range,
.read_handler = vlapic_read,
.write_handler = vlapic_write
return !!virt;
}
-struct hvm_mmio_handler msixtbl_mmio_handler = {
+const struct hvm_mmio_handler msixtbl_mmio_handler = {
.check_handler = msixtbl_range,
.read_handler = msixtbl_read,
.write_handler = msixtbl_write
#include <asm/shadow.h>
#include <asm/tboot.h>
-static int opt_vpid_enabled = 1;
+static int __read_mostly opt_vpid_enabled = 1;
boolean_param("vpid", opt_vpid_enabled);
-static int opt_unrestricted_guest_enabled = 1;
+static int __read_mostly opt_unrestricted_guest_enabled = 1;
boolean_param("unrestricted_guest", opt_unrestricted_guest_enabled);
/*
* Time is measured based on a counter that runs at the same rate as the TSC,
* refer SDM volume 3b section 21.6.13 & 22.1.3.
*/
-static unsigned int ple_gap = 41;
+static unsigned int __read_mostly ple_gap = 41;
integer_param("ple_gap", ple_gap);
-static unsigned int ple_window = 4096;
+static unsigned int __read_mostly ple_window = 4096;
integer_param("ple_window", ple_window);
/* Dynamic (run-time adjusted) execution control flags. */
vmx_vmcs_exit(v);
}
-static struct hvm_function_table vmx_function_table = {
+static struct hvm_function_table __read_mostly vmx_function_table = {
.name = "VMX",
.domain_initialise = vmx_domain_initialise,
.domain_destroy = vmx_domain_destroy,
return 0;
}
-static struct microcode_ops microcode_amd_ops = {
+static const struct microcode_ops microcode_amd_ops = {
.microcode_resume_match = microcode_resume_match,
.cpu_request_microcode = cpu_request_microcode,
.collect_cpu_info = collect_cpu_info,
(uci->cpu_sig.rev > nsig->rev));
}
-static struct microcode_ops microcode_intel_ops = {
+static const struct microcode_ops microcode_intel_ops = {
.microcode_resume_match = microcode_resume_match,
.cpu_request_microcode = cpu_request_microcode,
.collect_cpu_info = collect_cpu_info,
container_of(ctxt, struct ptwr_emulate_ctxt, ctxt));
}
-static struct x86_emulate_ops ptwr_emulate_ops = {
+static const struct x86_emulate_ops ptwr_emulate_ops = {
.read = ptwr_emulated_read,
.insn_fetch = ptwr_emulated_read,
.write = ptwr_emulated_write,
}
}
+static const struct paging_mode hap_paging_real_mode;
+static const struct paging_mode hap_paging_protected_mode;
+static const struct paging_mode hap_paging_pae_mode;
+static const struct paging_mode hap_paging_long_mode;
+
void hap_vcpu_init(struct vcpu *v)
{
v->arch.paging.mode = &hap_paging_real_mode;
}
/* Entry points into this mode of the hap code. */
-struct paging_mode hap_paging_real_mode = {
+static const struct paging_mode hap_paging_real_mode = {
.page_fault = hap_page_fault,
.invlpg = hap_invlpg,
.gva_to_gfn = hap_gva_to_gfn_real_mode,
.guest_levels = 1
};
-struct paging_mode hap_paging_protected_mode = {
+static const struct paging_mode hap_paging_protected_mode = {
.page_fault = hap_page_fault,
.invlpg = hap_invlpg,
.gva_to_gfn = hap_gva_to_gfn_2level,
.guest_levels = 2
};
-struct paging_mode hap_paging_pae_mode = {
+static const struct paging_mode hap_paging_pae_mode = {
.page_fault = hap_page_fault,
.invlpg = hap_invlpg,
.gva_to_gfn = hap_gva_to_gfn_3level,
.guest_levels = 3
};
-struct paging_mode hap_paging_long_mode = {
+static const struct paging_mode hap_paging_long_mode = {
.page_fault = hap_page_fault,
.invlpg = hap_invlpg,
.gva_to_gfn = hap_gva_to_gfn_4level,
return X86EMUL_UNHANDLEABLE;
}
-static struct x86_emulate_ops hvm_shadow_emulator_ops = {
+static const struct x86_emulate_ops hvm_shadow_emulator_ops = {
.read = hvm_emulate_read,
.insn_fetch = hvm_emulate_insn_fetch,
.write = hvm_emulate_write,
return X86EMUL_UNHANDLEABLE;
}
-static struct x86_emulate_ops pv_shadow_emulator_ops = {
+static const struct x86_emulate_ops pv_shadow_emulator_ops = {
.read = pv_emulate_read,
.insn_fetch = pv_emulate_read,
.write = pv_emulate_write,
.cmpxchg = pv_emulate_cmpxchg,
};
-struct x86_emulate_ops *shadow_init_emulation(
+const struct x86_emulate_ops *shadow_init_emulation(
struct sh_emulate_ctxt *sh_ctxt, struct cpu_user_regs *regs)
{
struct segment_register *creg, *sreg;
static void hash_foreach(struct vcpu *v,
unsigned int callback_mask,
- hash_callback_t callbacks[],
+ const hash_callback_t callbacks[],
mfn_t callback_mfn)
/* Walk the hash table looking at the types of the entries and
* calling the appropriate callback function for each entry.
unsigned long fault_addr)
{
/* Dispatch table for getting per-type functions */
- static hash_callback_t callbacks[SH_type_unused] = {
+ static const hash_callback_t callbacks[SH_type_unused] = {
NULL, /* none */
SHADOW_INTERNAL_NAME(sh_rm_write_access_from_l1, 2), /* l1_32 */
SHADOW_INTERNAL_NAME(sh_rm_write_access_from_l1, 2), /* fl1_32 */
int expected_count, do_locking;
/* Dispatch table for getting per-type functions */
- static hash_callback_t callbacks[SH_type_unused] = {
+ static const hash_callback_t callbacks[SH_type_unused] = {
NULL, /* none */
SHADOW_INTERNAL_NAME(sh_rm_mappings_from_l1, 2), /* l1_32 */
SHADOW_INTERNAL_NAME(sh_rm_mappings_from_l1, 2), /* fl1_32 */
/* Dispatch table for getting per-type functions: each level must
* be called with the function to remove a lower-level shadow. */
- static hash_callback_t callbacks[SH_type_unused] = {
+ static const hash_callback_t callbacks[SH_type_unused] = {
NULL, /* none */
NULL, /* l1_32 */
NULL, /* fl1_32 */
static void sh_update_paging_modes(struct vcpu *v)
{
struct domain *d = v->domain;
- struct paging_mode *old_mode = v->arch.paging.mode;
+ const struct paging_mode *old_mode = v->arch.paging.mode;
ASSERT(shadow_locked_by_me(d));
void shadow_audit_tables(struct vcpu *v)
{
/* Dispatch table for getting per-type functions */
- static hash_callback_t callbacks[SH_type_unused] = {
+ static const hash_callback_t callbacks[SH_type_unused] = {
NULL, /* none */
SHADOW_INTERNAL_NAME(sh_audit_l1_table, 2), /* l1_32 */
SHADOW_INTERNAL_NAME(sh_audit_fl1_table, 2), /* fl1_32 */
shadow_l1e_t sl1e, *ptr_sl1e;
paddr_t gpa;
struct sh_emulate_ctxt emul_ctxt;
- struct x86_emulate_ops *emul_ops;
+ const struct x86_emulate_ops *emul_ops;
int r;
fetch_type_t ft = 0;
p2m_type_t p2mt;
/**************************************************************************/
/* Entry points into this mode of the shadow code.
* This will all be mangled by the preprocessor to uniquify everything. */
-struct paging_mode sh_paging_mode = {
+const struct paging_mode sh_paging_mode = {
.page_fault = sh_page_fault,
.invlpg = sh_invlpg,
.gva_to_gfn = sh_gva_to_gfn,
SHADOW_INTERNAL_NAME(sh_destroy_monitor_table, GUEST_LEVELS)
(struct vcpu *v, mfn_t mmfn);
-extern struct paging_mode
+extern const struct paging_mode
SHADOW_INTERNAL_NAME(sh_paging_mode, GUEST_LEVELS);
#if SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC
#endif
};
-struct x86_emulate_ops *shadow_init_emulation(
+const struct x86_emulate_ops *shadow_init_emulation(
struct sh_emulate_ctxt *sh_ctxt, struct cpu_user_regs *regs);
void shadow_continue_emulation(
struct sh_emulate_ctxt *sh_ctxt, struct cpu_user_regs *regs);
};
cpumask_t node_to_cpumask[MAX_NUMNODES] __read_mostly;
-nodemask_t node_online_map = { { [0] = 1UL } };
+nodemask_t __read_mostly node_online_map = { { [0] = 1UL } };
/* Default NUMA to off for now. acpi=on required to enable it. */
int numa_off __initdata = 1;
struct op_counter_config counter_config[OP_MAX_COUNTER];
-static struct op_x86_model_spec const * model;
+static struct op_x86_model_spec const *__read_mostly model;
static struct op_msrs cpu_msrs[NR_CPUS];
static unsigned long saved_lvtpc[NR_CPUS];
/* p4 event codes in libop/op_event.h are indices into this table. */
-static struct p4_event_binding p4_events[NUM_EVENTS] = {
+static const struct p4_event_binding p4_events[NUM_EVENTS] = {
{ /* BRANCH_RETIRED */
0x05, 0x06,
unsigned int escr = 0;
unsigned int high = 0;
unsigned int counter_bit;
- struct p4_event_binding *ev = NULL;
+ const struct p4_event_binding *ev = NULL;
unsigned int stag;
stag = get_stagger();
op_ppro_spec.num_controls = num_counters;
}
-struct op_x86_model_spec op_ppro_spec = {
+struct op_x86_model_spec __read_mostly op_ppro_spec = {
.num_counters = 2,
.num_controls = 2,
.fill_in_addresses = &ppro_fill_in_addresses,
.save_msr = &ppro_save_msr
};
-struct op_x86_model_spec op_arch_perfmon_spec = {
+struct op_x86_model_spec __read_mostly op_arch_perfmon_spec = {
/* num_counters/num_controls filled in at runtime */
.fill_in_addresses = &ppro_fill_in_addresses,
.setup_ctrs = &ppro_setup_ctrs,
extern struct boot_video_info boot_vid_info;
/* opt_nosmp: If true, secondary processors are ignored. */
-static int opt_nosmp = 0;
+static int __initdata opt_nosmp = 0;
boolean_param("nosmp", opt_nosmp);
/* maxcpus: maximum number of CPUs to activate. */
-static unsigned int max_cpus = NR_CPUS;
+static unsigned int __initdata max_cpus = NR_CPUS;
integer_param("maxcpus", max_cpus);
/* opt_watchdog: If true, run a watchdog NMI on each processor. */
-static int opt_watchdog = 0;
+static int __initdata opt_watchdog = 0;
boolean_param("watchdog", opt_watchdog);
/* opt_tsc_unstable: Override all tests; assume TSC is unreliable. */
int early_boot = 1;
-cpumask_t cpu_present_map;
+cpumask_t __read_mostly cpu_present_map;
-unsigned long xen_phys_start;
+unsigned long __read_mostly xen_phys_start;
#ifdef CONFIG_X86_32
/* Limits of Xen heap, used to initialise the allocator. */
-unsigned long xenheap_initial_phys_start, xenheap_phys_end;
+unsigned long __initdata xenheap_initial_phys_start;
+unsigned long __read_mostly xenheap_phys_end;
#endif
DEFINE_PER_CPU_READ_MOSTLY(struct desc_struct *, gdt_table) = boot_cpu_gdt_table;
char __attribute__ ((__section__(".bss.stack_aligned"))) cpu0_stack[STACK_SIZE];
-struct cpuinfo_x86 boot_cpu_data = { 0, 0, 0, 0, -1 };
+struct cpuinfo_x86 __read_mostly boot_cpu_data = { 0, 0, 0, 0, -1 };
-unsigned long mmu_cr4_features = X86_CR4_PSE | X86_CR4_PGE | X86_CR4_PAE;
-EXPORT_SYMBOL(mmu_cr4_features);
+unsigned long __read_mostly mmu_cr4_features = X86_CR4_PSE | X86_CR4_PGE | X86_CR4_PAE;
-int acpi_disabled;
+int __read_mostly acpi_disabled;
-int acpi_force;
-char acpi_param[10] = "";
+int __read_mostly acpi_force;
+static char __initdata acpi_param[10] = "";
static void __init parse_acpi_param(char *s)
{
/* Save the parameter so it can be propagated to domain0. */
#include <asm/e820.h>
#include <asm/page.h>
-static struct acpi_table_slit *acpi_slit;
+static struct acpi_table_slit *__read_mostly acpi_slit;
static nodemask_t nodes_parsed __initdata;
static nodemask_t nodes_found __initdata;
static struct node nodes[MAX_NUMNODES] __initdata;
-static u8 pxm2node[256] = { [0 ... 255] = 0xff };
+static u8 __read_mostly pxm2node[256] = { [0 ... 255] = 0xff };
/* Too small nodes confuse the VM badly. Usually they result
from BIOS bugs. */
#include <crypto/vmac.h>
/* tboot=<physical address of shared page> */
-static char opt_tboot[20] = "";
+static char __initdata opt_tboot[20] = "";
string_param("tboot", opt_tboot);
/* Global pointer to shared data; NULL means no measured launch. */
static const uuid_t tboot_shared_uuid = TBOOT_SHARED_UUID;
/* used by tboot_protect_mem_regions() and/or tboot_parse_dmar_table() */
-static uint64_t txt_heap_base, txt_heap_size;
-static uint64_t sinit_base, sinit_size;
+static uint64_t __initdata txt_heap_base, __initdata txt_heap_size;
+static uint64_t __initdata sinit_base, __initdata sinit_size;
/*
* TXT configuration registers (offsets from TXT_{PUB, PRIV}_CONFIG_REGS_BASE)
* ignore: The NMI error is cleared and ignored.
*/
#ifdef NDEBUG
-char opt_nmi[10] = "dom0";
+static char __read_mostly opt_nmi[10] = "dom0";
#else
-char opt_nmi[10] = "fatal";
+static char __read_mostly opt_nmi[10] = "fatal";
#endif
string_param("nmi", opt_nmi);
addl $16,%esp
ret
-.data
+.section .rodata, "a", @progbits
ENTRY(exception_table)
.long do_divide_error
extern l1_pgentry_t l1_identmap[L1_PAGETABLE_ENTRIES];
-unsigned int PAGE_HYPERVISOR = __PAGE_HYPERVISOR;
-unsigned int PAGE_HYPERVISOR_NOCACHE = __PAGE_HYPERVISOR_NOCACHE;
+unsigned int __read_mostly PAGE_HYPERVISOR = __PAGE_HYPERVISOR;
+unsigned int __read_mostly PAGE_HYPERVISOR_NOCACHE = __PAGE_HYPERVISOR_NOCACHE;
-static unsigned long mpt_size;
+static unsigned long __read_mostly mpt_size;
void *alloc_xen_pagetable(void)
{
movl $TRAP_machine_check,4(%rsp)
jmp handle_ist_exception
-.data
+.section .rodata, "a", @progbits
ENTRY(exception_table)
.quad do_divide_error
unsigned long __read_mostly pfn_hole_mask = 0;
unsigned int __read_mostly pfn_pdx_hole_shift = 0;
-unsigned int m2p_compat_vstart = __HYPERVISOR_COMPAT_VIRT_START;
+unsigned int __read_mostly m2p_compat_vstart = __HYPERVISOR_COMPAT_VIRT_START;
DEFINE_PER_CPU_READ_MOSTLY(void *, compat_arg_xlat);
unsigned long *val,
unsigned int bytes,
struct x86_emulate_ctxt *ctxt,
- struct x86_emulate_ops *ops)
+ const struct x86_emulate_ops *ops)
{
*val = 0;
return ops->read(seg, offset, val, bytes, ctxt);
static int
get_cpl(
struct x86_emulate_ctxt *ctxt,
- struct x86_emulate_ops *ops)
+ const struct x86_emulate_ops *ops)
{
struct segment_register reg;
static int
_mode_iopl(
struct x86_emulate_ctxt *ctxt,
- struct x86_emulate_ops *ops)
+ const struct x86_emulate_ops *ops)
{
int cpl = get_cpl(ctxt, ops);
if ( cpl == -1 )
unsigned int first_port,
unsigned int bytes,
struct x86_emulate_ctxt *ctxt,
- struct x86_emulate_ops *ops)
+ const struct x86_emulate_ops *ops)
{
unsigned long iobmp;
struct segment_register tr;
static int
in_realmode(
struct x86_emulate_ctxt *ctxt,
- struct x86_emulate_ops *ops)
+ const struct x86_emulate_ops *ops)
{
unsigned long cr0;
int rc;
static int
in_protmode(
struct x86_emulate_ctxt *ctxt,
- struct x86_emulate_ops *ops)
+ const struct x86_emulate_ops *ops)
{
return !(in_realmode(ctxt, ops) || (ctxt->regs->eflags & EFLG_VM));
}
static int
in_longmode(
struct x86_emulate_ctxt *ctxt,
- struct x86_emulate_ops *ops)
+ const struct x86_emulate_ops *ops)
{
uint64_t efer;
enum x86_segment seg,
uint16_t sel,
struct x86_emulate_ctxt *ctxt,
- struct x86_emulate_ops *ops)
+ const struct x86_emulate_ops *ops)
{
struct segment_register reg;
int rc;
enum x86_segment seg,
uint16_t sel,
struct x86_emulate_ctxt *ctxt,
- struct x86_emulate_ops *ops)
+ const struct x86_emulate_ops *ops)
{
struct segment_register desctab, ss, segr;
struct { uint32_t a, b; } desc;
enum x86_segment seg,
uint16_t sel,
struct x86_emulate_ctxt *ctxt,
- struct x86_emulate_ops *ops)
+ const struct x86_emulate_ops *ops)
{
if ( (ops->read_segment == NULL) ||
(ops->write_segment == NULL) )
int
x86_emulate(
struct x86_emulate_ctxt *ctxt,
- struct x86_emulate_ops *ops)
+ const struct x86_emulate_ops *ops)
{
/* Shadow copy of register state. Committed on successful emulation. */
struct cpu_user_regs _regs = *ctxt->regs;
int
x86_emulate(
struct x86_emulate_ctxt *ctxt,
- struct x86_emulate_ops *ops);
+ const struct x86_emulate_ops *ops);
/*
* Given the 'reg' portion of a ModRM byte, and a register block, return a
#include <asm/byteorder.h>
/* for ctype.h */
-unsigned char _ctype[] = {
+const unsigned char _ctype[] = {
_C,_C,_C,_C,_C,_C,_C,_C, /* 0-7 */
_C,_C|_S,_C|_S,_C|_S,_C|_S,_C|_S,_C,_C, /* 8-15 */
_C,_C,_C,_C,_C,_C,_C,_C, /* 16-23 */
/* ------------------------------------------------------------------------ */
/* xen features */
-const char *elf_xen_feature_names[] = {
+static const char *const elf_xen_feature_names[] = {
[XENFEAT_writable_page_tables] = "writable_page_tables",
[XENFEAT_writable_descriptor_tables] = "writable_descriptor_tables",
[XENFEAT_auto_translated_physmap] = "auto_translated_physmap",
[XENFEAT_supervisor_mode_kernel] = "supervisor_mode_kernel",
[XENFEAT_pae_pgdir_above_4gb] = "pae_pgdir_above_4gb"
};
-const int elf_xen_features =
+static const int elf_xen_features =
sizeof(elf_xen_feature_names) / sizeof(elf_xen_feature_names[0]);
int elf_xen_parse_features(const char *features,
- now % MILLISECS(CSCHED_MSECS_PER_TICK) );
}
-struct scheduler sched_credit_def = {
+const struct scheduler sched_credit_def = {
.name = "SMP Credit Scheduler",
.opt_name = "credit",
.sched_id = XEN_SCHEDULER_CREDIT,
return 0;
}
-struct scheduler sched_sedf_def = {
+const struct scheduler sched_sedf_def = {
.name = "Simple EDF Scheduler",
.opt_name = "sedf",
.sched_id = XEN_SCHEDULER_SEDF,
/* This is global for now so that private implementations can reach it */
DEFINE_PER_CPU(struct schedule_data, schedule_data);
-extern struct scheduler sched_sedf_def;
-extern struct scheduler sched_credit_def;
-static struct scheduler *schedulers[] = {
+extern const struct scheduler sched_sedf_def;
+extern const struct scheduler sched_credit_def;
+static const struct scheduler *__initdata schedulers[] = {
&sched_sedf_def,
&sched_credit_def,
NULL
};
-static struct scheduler ops;
+static struct scheduler __read_mostly ops;
#define SCHED_OP(fn, ...) \
(( ops.fn != NULL ) ? ops.fn( __VA_ARGS__ ) \
/* Char 1: CTRL+<char1> is used to switch console input between Xen and DOM0 */
/* Char 2: If this character is 'x', then do not auto-switch to DOM0 when it */
/* boots. Any other value, or omitting the char, enables auto-switch */
-static unsigned char opt_conswitch[3] = "a";
+static unsigned char __read_mostly opt_conswitch[3] = "a";
string_param("conswitch", opt_conswitch);
/* sync_console: force synchronous console output (useful for debugging). */
-static int opt_sync_console;
+static int __read_mostly opt_sync_console;
boolean_param("sync_console", opt_sync_console);
/* console_to_ring: send guest (incl. dom 0) console data to console ring. */
-static int opt_console_to_ring;
+static int __read_mostly opt_console_to_ring;
boolean_param("console_to_ring", opt_console_to_ring);
/* console_timestamps: include a timestamp prefix on every Xen console line. */
-static int opt_console_timestamps;
+static int __read_mostly opt_console_timestamps;
boolean_param("console_timestamps", opt_console_timestamps);
/* conring_size: allows a large console ring than default (16kB). */
#define _CONRING_SIZE 16384
#define CONRING_IDX_MASK(i) ((i)&(conring_size-1))
-static char _conring[_CONRING_SIZE], *conring = _conring;
-static uint32_t conring_size = _CONRING_SIZE, conringc, conringp;
+static char _conring[_CONRING_SIZE], *__read_mostly conring = _conring;
+static uint32_t __read_mostly conring_size = _CONRING_SIZE;
+static uint32_t conringc, conringp;
-static int sercon_handle = -1;
+static int __read_mostly sercon_handle = -1;
static DEFINE_SPINLOCK(console_lock);
#define XENLOG_DEFAULT 1 /* XENLOG_WARNING */
#define XENLOG_GUEST_DEFAULT 1 /* XENLOG_WARNING */
-static int xenlog_upper_thresh = XENLOG_UPPER_THRESHOLD;
-static int xenlog_lower_thresh = XENLOG_LOWER_THRESHOLD;
-static int xenlog_guest_upper_thresh = XENLOG_GUEST_UPPER_THRESHOLD;
-static int xenlog_guest_lower_thresh = XENLOG_GUEST_LOWER_THRESHOLD;
+static int __read_mostly xenlog_upper_thresh = XENLOG_UPPER_THRESHOLD;
+static int __read_mostly xenlog_lower_thresh = XENLOG_LOWER_THRESHOLD;
+static int __read_mostly xenlog_guest_upper_thresh = XENLOG_GUEST_UPPER_THRESHOLD;
+static int __read_mostly xenlog_guest_lower_thresh = XENLOG_GUEST_LOWER_THRESHOLD;
static void parse_loglvl(char *s);
static void parse_guest_loglvl(char *s);
/* CTRL-<switch_char> switches input direction between Xen and DOM0. */
#define switch_code (opt_conswitch[0]-'a'+1)
-static int xen_rx = 1; /* FALSE => serial input passed to domain 0. */
+static int __read_mostly xen_rx = 1; /* FALSE => serial input passed to domain 0. */
static void switch_serial_input(void)
{
}
/* minimum time in ms between messages */
-int printk_ratelimit_ms = 5 * 1000;
+static int __read_mostly printk_ratelimit_ms = 5 * 1000;
/* number of messages we send before ratelimiting */
-int printk_ratelimit_burst = 10;
+static int __read_mostly printk_ratelimit_burst = 10;
int printk_ratelimit(void)
{
return ((uart->irq > 0) ? uart->irq : -1);
}
-static struct uart_driver ns16550_driver = {
+static struct uart_driver __read_mostly ns16550_driver = {
.init_preirq = ns16550_init_preirq,
.init_postirq = ns16550_init_postirq,
.endboot = ns16550_endboot,
/* Never drop characters, even if the async transmit buffer fills. */
/* #define SERIAL_NEVER_DROP_CHARS 1 */
-unsigned int serial_txbufsz = 16384;
+unsigned int __read_mostly serial_txbufsz = 16384;
size_param("serial_tx_buffer", serial_txbufsz);
#define mask_serial_rxbuf_idx(_i) ((_i)&(serial_rxbufsz-1))
return rt;
}
-struct iommu_ops amd_iommu_ops = {
+const struct iommu_ops amd_iommu_ops = {
.init = amd_iommu_domain_init,
.add_device = amd_iommu_add_device,
.remove_device = amd_iommu_remove_device,
int group_id, sdev_id;
u32 bdf;
int i = 0;
- struct iommu_ops *ops = hd->platform_ops;
+ const struct iommu_ops *ops = hd->platform_ops;
if ( !iommu_enabled || !ops || !ops->get_device_group_id )
return 0;
void iommu_update_ire_from_apic(
unsigned int apic, unsigned int reg, unsigned int value)
{
- struct iommu_ops *ops = iommu_get_ops();
+ const struct iommu_ops *ops = iommu_get_ops();
ops->update_ire_from_apic(apic, reg, value);
}
void iommu_update_ire_from_msi(
struct msi_desc *msi_desc, struct msi_msg *msg)
{
- struct iommu_ops *ops = iommu_get_ops();
+ const struct iommu_ops *ops = iommu_get_ops();
ops->update_ire_from_msi(msi_desc, msg);
}
void iommu_read_msi_from_ire(
struct msi_desc *msi_desc, struct msi_msg *msg)
{
- struct iommu_ops *ops = iommu_get_ops();
+ const struct iommu_ops *ops = iommu_get_ops();
ops->read_msi_from_ire(msi_desc, msg);
}
unsigned int iommu_read_apic_from_ire(unsigned int apic, unsigned int reg)
{
- struct iommu_ops *ops = iommu_get_ops();
+ const struct iommu_ops *ops = iommu_get_ops();
return ops->read_apic_from_ire(apic, reg);
}
void iommu_resume()
{
- struct iommu_ops *ops = iommu_get_ops();
+ const struct iommu_ops *ops = iommu_get_ops();
if ( iommu_enabled )
ops->resume();
}
void iommu_suspend()
{
- struct iommu_ops *ops = iommu_get_ops();
+ const struct iommu_ops *ops = iommu_get_ops();
if ( iommu_enabled )
ops->suspend();
}
}
}
-struct iommu_ops intel_iommu_ops = {
+const struct iommu_ops intel_iommu_ops = {
.init = intel_iommu_domain_init,
.add_device = intel_iommu_add_device,
.remove_device = intel_iommu_remove_device,
#include <asm/iosapic.h>
struct iommu_ops;
-extern struct iommu_ops intel_iommu_ops;
+extern const struct iommu_ops intel_iommu_ops;
extern int intel_vtd_setup(void);
#define iommu_get_ops() (&intel_iommu_ops)
struct paging_vcpu {
/* Pointers to mode-specific entry points. */
- struct paging_mode *mode;
+ const struct paging_mode *mode;
/* HVM guest: last emulate was to a pagetable */
unsigned int last_write_was_pt:1;
/* HVM guest: last write emulation succeeds */
struct mpc_config_processor;
struct genapic {
- char *name;
+ const char *name;
int (*probe)(void);
/* When one of the next two hooks returns 1 the genapic
APICFUNC(mps_oem_check), \
APICFUNC(acpi_madt_oem_check)
-extern struct genapic *genapic;
-extern struct genapic apic_x2apic_phys;
-extern struct genapic apic_x2apic_cluster;
+extern const struct genapic *genapic;
+extern const struct genapic apic_x2apic_phys;
+extern const struct genapic apic_x2apic_cluster;
void init_apic_ldr_flat(void);
void clustered_apic_check_flat(void);
unsigned long nr,
XEN_GUEST_HANDLE_64(uint8) dirty_bitmap);
-extern struct paging_mode hap_paging_real_mode;
-extern struct paging_mode hap_paging_protected_mode;
-extern struct paging_mode hap_paging_pae_mode;
-extern struct paging_mode hap_paging_long_mode;
-
#endif /* XEN_HAP_H */
/*
#define __ASM_X86_HVM_IOMMU_H__
struct iommu_ops;
-extern struct iommu_ops intel_iommu_ops;
-extern struct iommu_ops amd_iommu_ops;
+extern const struct iommu_ops intel_iommu_ops;
+extern const struct iommu_ops amd_iommu_ops;
extern int intel_vtd_setup(void);
extern int amd_iov_detect(void);
-static inline struct iommu_ops *iommu_get_ops(void)
+static inline const struct iommu_ops *iommu_get_ops(void)
{
switch ( boot_cpu_data.x86_vendor )
{
#define _X 0x40 /* hex digit */
#define _SP 0x80 /* hard space (0x20) */
-extern unsigned char _ctype[];
+extern const unsigned char _ctype[];
#define __ismask(x) (_ctype[(int)(unsigned char)(x)])
struct page_info *root_table;
/* iommu_ops */
- struct iommu_ops *platform_ops;
+ const struct iommu_ops *platform_ops;
};
#endif /* __XEN_HVM_IOMMU_H__ */